/* I/O permission bitmap is globally shared by all HVM guests. */
char __attribute__ ((__section__ (".bss.page_aligned")))
hvm_io_bitmap[3*PAGE_SIZE];
-/* MSR permission bitmap is globally shared by all HVM guests. */
-char __attribute__ ((__section__ (".bss.page_aligned")))
- hvm_msr_bitmap[PAGE_SIZE];
void hvm_enable(struct hvm_function_table *fns)
{
memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
clear_bit(0x80, hvm_io_bitmap);
- /* All MSR accesses are intercepted by default. */
- memset(hvm_msr_bitmap, ~0, sizeof(hvm_msr_bitmap));
-
hvm_funcs = *fns;
hvm_enabled = 1;
}
v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
if ( cpu_has_vmx_msr_bitmap )
- __vmwrite(MSR_BITMAP, virt_to_maddr(hvm_msr_bitmap));
+ __vmwrite(MSR_BITMAP, virt_to_maddr(vmx_msr_bitmap));
/* I/O access bitmap. */
__vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
#include <public/hvm/save.h>
#include <asm/hvm/trace.h>
+char *vmx_msr_bitmap;
+
static void vmx_ctxt_switch_from(struct vcpu *v);
static void vmx_ctxt_switch_to(struct vcpu *v);
*/
if ( msr <= 0x1fff )
{
- __clear_bit(msr, hvm_msr_bitmap + 0x000); /* read-low */
- __clear_bit(msr, hvm_msr_bitmap + 0x800); /* write-low */
+ __clear_bit(msr, vmx_msr_bitmap + 0x000); /* read-low */
+ __clear_bit(msr, vmx_msr_bitmap + 0x800); /* write-low */
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- __clear_bit(msr, hvm_msr_bitmap + 0x400); /* read-high */
- __clear_bit(msr, hvm_msr_bitmap + 0xc00); /* write-high */
+ __clear_bit(msr, vmx_msr_bitmap + 0x400); /* read-high */
+ __clear_bit(msr, vmx_msr_bitmap + 0xc00); /* write-high */
}
}
if ( cpu_has_vmx_msr_bitmap )
{
printk("VMX: MSR intercept bitmap enabled\n");
+ vmx_msr_bitmap = alloc_xenheap_page();
+ BUG_ON(vmx_msr_bitmap == NULL);
+ memset(vmx_msr_bitmap, ~0, PAGE_SIZE);
disable_intercept_for_msr(MSR_FS_BASE);
disable_intercept_for_msr(MSR_GS_BASE);
}